d <- read_csv(data_path)
##
## ── Column specification ────────────────────────────────────────────────────────
## cols(
## .default = col_character(),
## trial_index = col_double(),
## time_elapsed = col_double(),
## experiment_id = col_double(),
## survey_code = col_logical(),
## seed = col_double(),
## success = col_logical(),
## timeout = col_logical(),
## rt = col_double(),
## start_time = col_double(),
## end_time = col_double(),
## choice_index = col_double(),
## reward_score = col_double(),
## reward = col_double(),
## reward_score_unadjusted = col_double(),
## score_after_trial = col_double(),
## slider_start = col_double(),
## n = col_double()
## )
## ℹ Use `spec()` for the full column specifications.
d <- d %>%
filter(
!(trial_type %in% c("show-reward"))
)
Adding columns to characterize participant choices.
d <- d %>%
mutate(
trial_number = case_when(
trial_index<8 ~ trial_index,
trial_index<199 ~ 7+(trial_index-7)/2,
TRUE ~ trial_index-96
)
) %>%
relocate(trial_number,.after=trial_index) %>%
mutate(
test_trial_number = case_when(
trial_number<7 ~ NA_real_,
trial_number<103 ~ trial_number-6,
TRUE ~ NA_real_
)
) %>%
relocate(test_trial_number,.after=trial_number) %>%
mutate(
block_trial_number = case_when(
test_trial_number < 49 ~ test_trial_number,
TRUE ~ test_trial_number - 48),
block_trial_number_c = block_trial_number - 24.5
) %>%
relocate(block_trial_number,.after=test_trial_number) %>%
relocate(block_trial_number_c,.after=block_trial_number) %>%
mutate(
explore_block = case_when(
test_trial_number<9 ~ 1,
test_trial_number<17 ~ 2,
test_trial_number<25 ~ 3,
test_trial_number<33 ~ 4,
test_trial_number < 41 ~ 5,
test_trial_number < 49 ~ 6,
test_trial_number < 57 ~ 7,
test_trial_number<65 ~ 8,
test_trial_number<73 ~ 9,
test_trial_number<81 ~ 10,
test_trial_number <89 ~ 11,
test_trial_number <97 ~ 12,
TRUE ~ NA_real_
)
) %>%
mutate(
max_reward_choice = case_when(
reward_score_unadjusted ==8 ~ 1,
!is.na(test_trial_number) ~ 0,
TRUE ~ NA_real_
)
) %>%
mutate(
cur_structure_condition=case_when(
test_trial_number < 49 ~ structure_condition,
!is.na(test_trial_number) & match_condition == "match" ~ structure_condition,
test_trial_number >= 49 & structure_condition == "emotion" ~ "model",
test_trial_number >= 49 & structure_condition == "model" ~ "emotion"
)
) %>%
mutate(block = case_when(
test_trial_number < 49 ~ 1,
test_trial_number >= 49 ~ 2,
TRUE ~ NA_real_
))
#recenter vars
d <- d %>%
mutate(
structure_condition_c = case_when(
structure_condition == "model" ~ -0.5,
structure_condition == "emotion" ~ 0.5),
cur_structure_condition_c = case_when(
cur_structure_condition == "model" ~ -0.5,
cur_structure_condition == "emotion" ~ 0.5),
match_condition_c = case_when(
match_condition == "match" ~ 0.5,
match_condition == "mismatch" ~ -0.5
),
cur_structure_condition_model = case_when(
cur_structure_condition == "model" ~ 0,
cur_structure_condition == "emotion" ~ 1),
cur_structure_condition_emotion = case_when(
cur_structure_condition == "model" ~ -1,
cur_structure_condition == "emotion" ~ 0),
match_condition_match = case_when(
match_condition == "match" ~ 0,
match_condition == "mismatch" ~ -1
),
match_condition_mismatch = case_when(
match_condition == "match" ~ 1,
match_condition == "mismatch" ~ 0
),
block_c = case_when(
test_trial_number < 49 ~ -0.5,
TRUE ~ 0.5
),
block_learn = case_when(
block==1 ~ 0,
block==2 ~ 1
),
block_gen = case_when(
block==1 ~ -1,
block==2 ~ 0
)
)
open_resps <- d %>%
filter(trial_index %in% 206) %>%
select(subject, structure_condition, match_condition ,response) %>%
extract(response, into = c("patterns", "strategy", "comments"),
regex = "patterns\":\"(.*)\",\"strategy\":\"(.*)\",\"comments\":\"(.*)")
#write_csv(open_resps, here("data-analysis","data","v1","processed","emogo-v1-openresponses.csv"))
attention_check <- d %>%
filter(trial_index %in% c(4,5)) %>%
mutate(
attention_check_correct_choice = case_when(
trial_index == 4 ~ "stimuli/horse.jpg",
trial_index == 5 ~ "stimuli/hammer.jpg"
),
check_correct = ifelse(attention_check_correct_choice==choiceImage,1,0)
) %>%
group_by(subject) %>%
summarize(
N=n(),
avg_correct = mean(check_correct)
)
passed_attention_check <- attention_check %>%
filter(avg_correct ==1) %>%
pull(subject)
total_time <- d %>%
filter(trial_index==206) %>%
select(subject,time_elapsed) %>%
distinct() %>%
mutate(time_mins = time_elapsed/1000/60)
#Minumum time
min(total_time$time_mins)
## [1] 5.708983
#Any subjects with times under 4 minutes?
subjects_too_fast <- total_time %>%
filter(time_mins<4)
subjects_too_fast %>%
pull(subject)
## character(0)
percent_location_selections <- d %>%
filter(!is.na(test_trial_number)) %>%
group_by(subject,choiceLocation) %>%
summarise(n = n()) %>%
mutate(freq = n / sum(n))
## `summarise()` has grouped output by 'subject'. You can override using the
## `.groups` argument.
#any frequencies above 80%?
subjects_same_location_exclusion <- percent_location_selections %>%
filter(freq>0.8)
subjects_same_location_exclusion %>%
distinct(subject) %>%
pull(subject)
## [1] "p911757"
reward_rank <- d %>%
filter(subject %in% passed_attention_check) %>%
filter(test_trial_number==96) %>%
select(subject,structure_condition,match_condition,score_after_trial)
median_score <- median(reward_rank$score_after_trial)
ggplot(reward_rank,aes(x=score_after_trial))+
geom_histogram()+
geom_vline(xintercept = median_score)
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
ggplot(reward_rank,aes(x=score_after_trial,color=match_condition))+
geom_density()+
facet_wrap(~structure_condition)
subjects_top_50 <- reward_rank %>%
arrange(desc(reward_rank)) %>%
slice_head(n = 50)
write_csv(subjects_top_50,here("data-analysis","data","spark-personality","processed","subjects_top_50.csv"))
conditions_top_50 <- reward_rank %>%
filter(subject %in% subjects_top_50$subject) %>%
group_by(structure_condition,match_condition) %>%
tally()
conditions_top_50
## # A tibble: 4 × 3
## # Groups: structure_condition [2]
## structure_condition match_condition n
## <chr> <chr> <int>
## 1 emotion match 10
## 2 emotion mismatch 10
## 3 model match 16
## 4 model mismatch 14
#exclude any participants who meet exclusion criteria
d <- d %>%
filter(subject %in% passed_attention_check) %>%
filter(!(subject %in% subjects_same_location_exclusion)) %>%
filter(!(subject %in% subjects_too_fast))
d %>%
distinct(subject,structure_condition,match_condition) %>%
group_by(structure_condition,match_condition) %>%
tally()
## # A tibble: 4 × 3
## # Groups: structure_condition [2]
## structure_condition match_condition n
## <chr> <chr> <int>
## 1 emotion match 50
## 2 emotion mismatch 50
## 3 model match 50
## 4 model mismatch 48
subject_by_block <- d %>%
filter(!is.na(explore_block)) %>%
group_by(subject,match_condition,structure_condition,explore_block) %>%
summarize(
max_choice_percent=mean(max_reward_choice)
)
## `summarise()` has grouped output by 'subject', 'match_condition',
## 'structure_condition'. You can override using the `.groups` argument.
summarize_by_block <- subject_by_block %>%
group_by(explore_block) %>%
summarize(
N=n(),
max_choice = mean(max_choice_percent),
se = sqrt(var(max_choice_percent, na.rm = TRUE)/N),
ci=qt(0.975, N-1)*sd(max_choice_percent,na.rm=TRUE)/sqrt(N),
lower_ci=max_choice-ci,
upper_ci=max_choice+ci,
lower_se=max_choice-se,
upper_se=max_choice+se
)
summarize_by_block_by_condition <- subject_by_block %>%
group_by(match_condition,structure_condition,explore_block) %>%
summarize(
N=n(),
max_choice = mean(max_choice_percent),
se = sqrt(var(max_choice_percent, na.rm = TRUE)/N),
ci=qt(0.975, N-1)*sd(max_choice_percent,na.rm=TRUE)/sqrt(N),
lower_ci=max_choice-ci,
upper_ci=max_choice+ci,
lower_se=max_choice-se,
upper_se=max_choice+se
)
## `summarise()` has grouped output by 'match_condition', 'structure_condition'.
## You can override using the `.groups` argument.
summarize_choice_by_trial <- d %>%
filter(!is.na(test_trial_number)) %>%
group_by(structure_condition, match_condition,test_trial_number) %>%
summarize(
N=n(),
reward_8 = mean(reward_score_unadjusted==8),
reward_6 = mean(reward_score_unadjusted==6),
reward_4 = mean(reward_score_unadjusted==4),
reward_2 = mean(reward_score_unadjusted==2)
) %>%
pivot_longer(cols = c(reward_8,reward_6,reward_4,reward_2),names_to = "reward",values_to = "percent_choice",names_prefix="reward_")
## `summarise()` has grouped output by 'structure_condition', 'match_condition'.
## You can override using the `.groups` argument.
summarize_choice_by_block <- d %>%
filter(!is.na(test_trial_number)) %>%
group_by(structure_condition, match_condition,explore_block) %>%
summarize(
N=n(),
reward_8 = mean(reward_score_unadjusted==8),
reward_6 = mean(reward_score_unadjusted==6),
reward_4 = mean(reward_score_unadjusted==4),
reward_2 = mean(reward_score_unadjusted==2)
) %>%
pivot_longer(cols = c(reward_8,reward_6,reward_4,reward_2),names_to = "reward",values_to = "percent_choice",names_prefix="reward_")
## `summarise()` has grouped output by 'structure_condition', 'match_condition'.
## You can override using the `.groups` argument.
summarize_choice_by_subject <- d %>%
filter(!is.na(test_trial_number)) %>%
group_by(subject,cur_structure_condition, match_condition,block) %>%
summarize(
N=n(),
reward_8 = mean(reward_score_unadjusted==8)
) %>%
mutate(
block_name = case_when(
block==2 ~ "generalization block",
block==1~"learning block"
)
)
## `summarise()` has grouped output by 'subject', 'cur_structure_condition',
## 'match_condition'. You can override using the `.groups` argument.
summarize_choice_by_subject$block_name <- factor(summarize_choice_by_subject$block_name, levels=c("learning block","generalization block"))
ggplot(subject_by_block,aes(explore_block,max_choice_percent))+
geom_point(size=1.5,alpha=0.1,aes(group=subject))+
geom_line(alpha=0.1,aes(group=subject))+
geom_point(data=summarize_by_block,aes(y=max_choice),size=2,color="black")+
geom_line(data=summarize_by_block,aes(y=max_choice),size=1.2,color="black")+
geom_errorbar(data=summarize_by_block,aes(y=max_choice,ymin=lower_se,ymax=upper_se),width=0,color="black")+
geom_vline(xintercept=6.5,linetype="dotted")+
geom_hline(yintercept=0.25,linetype="dashed")+
theme(legend.position="none")+
scale_x_continuous(breaks=seq(1,12))+
xlab("Block (8 Trials = 1 Block)")+
ylab("Percent reward-maximizing choices")
## Warning: Using `size` aesthetic for lines was deprecated in ggplot2 3.4.0.
## Warning: Please use `linewidth` instead.
ggplot(summarize_by_block_by_condition,aes(explore_block,max_choice, color=structure_condition,shape=match_condition,linetype=match_condition))+
geom_point(size=1.5,alpha=0.5)+
geom_line(alpha=0.5)+
geom_point(aes(y=max_choice),size=2)+
geom_line(aes(y=max_choice),size=1.2)+
geom_errorbar(aes(y=max_choice,ymin=lower_se,ymax=upper_se),width=0)+
geom_vline(xintercept=6.5,linetype="dotted")+
geom_hline(yintercept=0.25,linetype="dashed")+
#theme(legend.position="none")+
scale_x_continuous(breaks=seq(1,12))+
xlab("Block (8 Trials = 1 Block)")+
ylab("Percent reward-maximizing choices")
ggplot(subject_by_block,aes(explore_block,max_choice_percent, group=subject))+
#geom_point(size=1.5,alpha=0.5)+
geom_line(alpha=0.5)+
geom_vline(xintercept=6.5,linetype="dotted")+
geom_hline(yintercept=0.25,linetype="dashed")+
theme(legend.position="none")+
facet_wrap(~structure_condition+match_condition)+
scale_x_continuous(breaks=seq(1,12))+
xlab("Block (8 Trials = 1 Block)")+
ylab("Percent reward-maximizing choices")
ggplot(summarize_choice_by_trial,aes(test_trial_number,percent_choice,color=reward))+
geom_point()+
geom_line()+
geom_vline(xintercept=48.5,linetype="dotted")+
geom_hline(yintercept=0.25,linetype="dashed")+
xlab("Trial Number")+
ylab("Percent choices")+
facet_wrap(~structure_condition+match_condition)
ggplot(summarize_choice_by_block,aes(explore_block,percent_choice,color=reward))+
geom_point()+
geom_line()+
geom_vline(xintercept=6.5,linetype="dotted")+
geom_hline(yintercept=0.25,linetype="dashed")+
xlab("Block (8 Trials = 1 Block)")+
ylab("Percent choices")+
facet_wrap(~structure_condition+match_condition)
ggplot(summarize_choice_by_subject,aes(cur_structure_condition,reward_8,color = cur_structure_condition))+
geom_boxplot()+
geom_jitter(width=0.1)+
geom_hline(yintercept=0.25,linetype="dashed")+
xlab("Structure Condition")+
ylab("Percent Reward-Maximizing Choices")+
facet_wrap(~match_condition+block_name)+
theme_cowplot()+
theme(legend.position="none")
ggsave(here(figure_path,"overall_reward_maximizing.png"),width=6,height=6)
#### Pruning random effects structure
#maximal random effects structure
m <- glmer(max_reward_choice ~ cur_structure_condition_c*match_condition_c*block_c*block_trial_number_c + (1+block_c*block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial,glmerControl(optimizer="bobyqa",optCtrl=list(maxfun=100000)))
summary(m)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: max_reward_choice ~ cur_structure_condition_c * match_condition_c *
## block_c * block_trial_number_c + (1 + block_c * block_trial_number_c |
## subject) + (1 | choiceImage)
## Data: d
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))
##
## AIC BIC logLik deviance df.resid
## 15003.7 15215.7 -7474.9 14949.7 18981
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -28.3472 -0.3887 0.0284 0.3538 14.7563
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## choiceImage (Intercept) 5.968528 2.44306
## subject (Intercept) 4.155422 2.03849
## block_c 6.505189 2.55053 0.07
## block_trial_number_c 0.004465 0.06682 0.86 0.27
## block_c:block_trial_number_c 0.006095 0.07807 0.29 0.73 0.42
## Number of obs: 19008, groups: choiceImage, 248; subject, 198
##
## Fixed effects:
## Estimate
## (Intercept) 0.007530
## cur_structure_condition_c -1.359657
## match_condition_c 1.315749
## block_c 0.770522
## block_trial_number_c 0.075711
## cur_structure_condition_c:match_condition_c -0.665562
## cur_structure_condition_c:block_c -1.540465
## match_condition_c:block_c 2.060052
## cur_structure_condition_c:block_trial_number_c -0.054451
## match_condition_c:block_trial_number_c 0.041163
## block_c:block_trial_number_c 0.033377
## cur_structure_condition_c:match_condition_c:block_c -1.805714
## cur_structure_condition_c:match_condition_c:block_trial_number_c -0.043272
## cur_structure_condition_c:block_c:block_trial_number_c -0.036828
## match_condition_c:block_c:block_trial_number_c 0.061497
## cur_structure_condition_c:match_condition_c:block_c:block_trial_number_c -0.050052
## Std. Error
## (Intercept) 0.220282
## cur_structure_condition_c 0.254123
## match_condition_c 0.300754
## block_c 0.203582
## block_trial_number_c 0.005435
## cur_structure_condition_c:match_condition_c 0.505466
## cur_structure_condition_c:block_c 0.509766
## match_condition_c:block_c 0.395350
## cur_structure_condition_c:block_trial_number_c 0.009156
## match_condition_c:block_trial_number_c 0.010698
## block_c:block_trial_number_c 0.007622
## cur_structure_condition_c:match_condition_c:block_c 1.014264
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.018125
## cur_structure_condition_c:block_c:block_trial_number_c 0.018318
## match_condition_c:block_c:block_trial_number_c 0.014709
## cur_structure_condition_c:match_condition_c:block_c:block_trial_number_c 0.036247
## z value
## (Intercept) 0.034
## cur_structure_condition_c -5.350
## match_condition_c 4.375
## block_c 3.785
## block_trial_number_c 13.930
## cur_structure_condition_c:match_condition_c -1.317
## cur_structure_condition_c:block_c -3.022
## match_condition_c:block_c 5.211
## cur_structure_condition_c:block_trial_number_c -5.947
## match_condition_c:block_trial_number_c 3.848
## block_c:block_trial_number_c 4.379
## cur_structure_condition_c:match_condition_c:block_c -1.780
## cur_structure_condition_c:match_condition_c:block_trial_number_c -2.387
## cur_structure_condition_c:block_c:block_trial_number_c -2.010
## match_condition_c:block_c:block_trial_number_c 4.181
## cur_structure_condition_c:match_condition_c:block_c:block_trial_number_c -1.381
## Pr(>|z|)
## (Intercept) 0.972731
## cur_structure_condition_c 8.78e-08
## match_condition_c 1.22e-05
## block_c 0.000154
## block_trial_number_c < 2e-16
## cur_structure_condition_c:match_condition_c 0.187929
## cur_structure_condition_c:block_c 0.002512
## match_condition_c:block_c 1.88e-07
## cur_structure_condition_c:block_trial_number_c 2.73e-09
## match_condition_c:block_trial_number_c 0.000119
## block_c:block_trial_number_c 1.19e-05
## cur_structure_condition_c:match_condition_c:block_c 0.075024
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.016969
## cur_structure_condition_c:block_c:block_trial_number_c 0.044381
## match_condition_c:block_c:block_trial_number_c 2.90e-05
## cur_structure_condition_c:match_condition_c:block_c:block_trial_number_c 0.167321
##
## (Intercept)
## cur_structure_condition_c ***
## match_condition_c ***
## block_c ***
## block_trial_number_c ***
## cur_structure_condition_c:match_condition_c
## cur_structure_condition_c:block_c **
## match_condition_c:block_c ***
## cur_structure_condition_c:block_trial_number_c ***
## match_condition_c:block_trial_number_c ***
## block_c:block_trial_number_c ***
## cur_structure_condition_c:match_condition_c:block_c .
## cur_structure_condition_c:match_condition_c:block_trial_number_c *
## cur_structure_condition_c:block_c:block_trial_number_c *
## match_condition_c:block_c:block_trial_number_c ***
## cur_structure_condition_c:match_condition_c:block_c:block_trial_number_c
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation matrix not shown by default, as p = 16 > 12.
## Use print(x, correlation=TRUE) or
## vcov(x) if you need it
#create predicted data
pX <- expand.grid(
cur_structure_condition_c=c(-0.5,0.5),
match_condition_c=c(-0.5,0.5),
block_c=c(-0.5,0.5),
block_trial_number_c=seq(-23.5,23.5,by=1))
predictions <- predictSE(m,pX,re.form=NA, type="response")
pX$fit <- predictions$fit
pX$se.fit <- predictions$se.fit
pX <- pX %>%
mutate(
block_trial_number = block_trial_number_c+24.5,
cur_structure_condition = case_when(
cur_structure_condition_c==0.5 ~ "emotion",
cur_structure_condition_c==-0.5 ~ "model"
),
match_condition=case_when(
match_condition_c==0.5 ~ "match",
match_condition_c==-0.5 ~ "mismatch"
),
block_name = case_when(
block_c==0.5 ~ "generalization block",
block_c==-0.5~"learning block"
)
) %>%
mutate(
block_name=factor(block_name,levels=c("learning block","generalization block"))
)
d <- d %>%
mutate(
block_name = case_when(
block==1 ~ "learning block",
block==2 ~ "generalization block")
) %>%
mutate(
block_name=factor(block_name,levels=c("learning block","generalization block"))
)
p <- ggplot(subset(d,!is.na(block_trial_number)),aes(block_trial_number,as.factor(max_reward_choice),color=cur_structure_condition))+
geom_point(size = 0.5, alpha=0.2,shape=19,position = position_jitterdodge(jitter.width = 0.05,jitter.height = 0.5,dodge.width = 0.2,seed = 1))+
geom_violinh(data=subset(d,!is.na(block_trial_number)&cur_structure_condition_c==-0.5),aes(fill=cur_structure_condition),position = position_nudge(x = 0, y = .3 ),scale="count",width=0.4,alpha=0.3,color=NA)+
geom_violinh(data=subset(d,!is.na(block_trial_number)&cur_structure_condition_c==0.5),aes(fill=cur_structure_condition),position = position_nudge(x = 0, y = -.3 ),scale="count",width=0.4,alpha=0.3,color=NA)+
geom_hline(yintercept=0.25*4+1,linetype="dashed")+
geom_smooth(data=pX,aes(y=fit*4+1,ymax=(fit+se.fit)*4+1,ymin=(fit-se.fit)*4+1,fill=cur_structure_condition),stat="identity")+
theme_classic(base_size=18)+
ylab("Probability of \nreward-maximizing choice")+
scale_color_brewer(
palette="Set1",
name="Structure Condition",
breaks=c(0.5,-0.5),
labels=c("Emotion","Model"))+
scale_fill_brewer(
palette="Set1",
name="Structure Condition",
breaks=c(0.5,-0.5),
labels=c("Emotion","Model"))+
scale_y_discrete(limits=c("0","0.25","0.5","0.75","1"))+
xlab("Block Trial Number")+
facet_wrap(~match_condition+block_name)+
theme(legend.position=c(0.4,0.4))
p
## Warning: Using the `size` aesthietic with geom_polygon was deprecated in ggplot2
## 3.4.0.
## Warning: Please use the `linewidth` aesthetic instead.
ggsave(here(figure_path,"model_predictions.png"),width=9,height=6)
p <- ggplot(subset(d,!is.na(block_trial_number)&block==2),aes(block_trial_number,as.factor(max_reward_choice),color=cur_structure_condition))+
geom_point(size = 0.5, alpha=0.1,shape=19,position = position_jitterdodge(jitter.width = 0.05,jitter.height = 0.5,dodge.width = 0.2,seed = 1))+
geom_violinh(data=subset(d,!is.na(block_trial_number)&cur_structure_condition_c==-0.5&block==2),aes(fill=cur_structure_condition),position = position_nudge(x = 0, y = .3 ),scale="count",width=0.4,alpha=0.2,color=NA)+
geom_violinh(data=subset(d,!is.na(block_trial_number)&cur_structure_condition_c==0.5&block==2),aes(fill=cur_structure_condition),position = position_nudge(x = 0, y = -.3 ),scale="count",width=0.4,alpha=0.2,color=NA)+
geom_hline(yintercept=0.25*4+1,linetype="dashed")+
geom_smooth(data=filter(pX,block_name=="generalization block"),aes(y=fit*4+1,ymax=(fit+se.fit)*4+1,ymin=(fit-se.fit)*4+1,fill=cur_structure_condition),stat="identity")+
theme_classic(base_size=18)+
ylab("Probability of \nreward-maximizing choice")+
scale_color_brewer(
palette="Set1",
name="Structure Condition",
breaks=c("model","emotion"),
labels=c("Model","Emotion"))+
scale_fill_brewer(
palette="Set1",
name="Structure Condition",
breaks=c("model","emotion"),
labels=c("Model","Emotion"))+
scale_y_discrete(limits=c("0","0.25","0.5","0.75","1"))+
xlab("Block Trial Number")+
facet_wrap(~match_condition)+
theme(legend.position=c(0.39,0.45),legend.title=element_text(size=14),legend.text=element_text(size=12),legend.background=element_rect(fill =NA))
p
ggsave(here(figure_path,"model_prediction_generalization_only.pdf"),width=9,height=6)
Recenter the model on the learning block
m <- glmer(max_reward_choice ~ cur_structure_condition_c*match_condition_c*block_learn*block_trial_number_c+ (1+block_learn*block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial,glmerControl(optimizer="bobyqa",optCtrl=list(maxfun=100000)))
summary(m)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: max_reward_choice ~ cur_structure_condition_c * match_condition_c *
## block_learn * block_trial_number_c + (1 + block_learn * block_trial_number_c |
## subject) + (1 | choiceImage)
## Data: d
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))
##
## AIC BIC logLik deviance df.resid
## 15003.7 15215.7 -7474.9 14949.7 18981
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -28.3477 -0.3887 0.0284 0.3538 14.7563
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## choiceImage (Intercept) 5.968539 2.44306
## subject (Intercept) 5.434544 2.33121
## block_learn 6.505286 2.55055 -0.49
## block_trial_number_c 0.003787 0.06154 0.75 -0.17
## block_learn:block_trial_number_c 0.006096 0.07807 -0.14 0.73
##
##
##
##
##
## -0.18
## Number of obs: 19008, groups: choiceImage, 248; subject, 198
##
## Fixed effects:
## Estimate
## (Intercept) -0.377726
## cur_structure_condition_c -0.589517
## match_condition_c 0.285730
## block_learn 0.770523
## block_trial_number_c 0.059023
## cur_structure_condition_c:match_condition_c 0.237557
## cur_structure_condition_c:block_learn -1.540380
## match_condition_c:block_learn 2.060016
## cur_structure_condition_c:block_trial_number_c -0.036040
## match_condition_c:block_trial_number_c 0.010414
## block_learn:block_trial_number_c 0.033378
## cur_structure_condition_c:match_condition_c:block_learn -1.806403
## cur_structure_condition_c:match_condition_c:block_trial_number_c -0.018242
## cur_structure_condition_c:block_learn:block_trial_number_c -0.036826
## match_condition_c:block_learn:block_trial_number_c 0.061497
## cur_structure_condition_c:match_condition_c:block_learn:block_trial_number_c -0.050071
## Std. Error
## (Intercept) 0.234960
## cur_structure_condition_c 0.341949
## match_condition_c 0.340990
## block_learn 0.203565
## block_trial_number_c 0.005188
## cur_structure_condition_c:match_condition_c 0.682228
## cur_structure_condition_c:block_learn 0.509740
## match_condition_c:block_learn 0.395277
## cur_structure_condition_c:block_trial_number_c 0.010303
## match_condition_c:block_trial_number_c 0.010250
## block_learn:block_trial_number_c 0.007622
## cur_structure_condition_c:match_condition_c:block_learn 1.014989
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.020498
## cur_structure_condition_c:block_learn:block_trial_number_c 0.018318
## match_condition_c:block_learn:block_trial_number_c 0.014707
## cur_structure_condition_c:match_condition_c:block_learn:block_trial_number_c 0.036263
## z value
## (Intercept) -1.608
## cur_structure_condition_c -1.724
## match_condition_c 0.838
## block_learn 3.785
## block_trial_number_c 11.377
## cur_structure_condition_c:match_condition_c 0.348
## cur_structure_condition_c:block_learn -3.022
## match_condition_c:block_learn 5.212
## cur_structure_condition_c:block_trial_number_c -3.498
## match_condition_c:block_trial_number_c 1.016
## block_learn:block_trial_number_c 4.379
## cur_structure_condition_c:match_condition_c:block_learn -1.780
## cur_structure_condition_c:match_condition_c:block_trial_number_c -0.890
## cur_structure_condition_c:block_learn:block_trial_number_c -2.010
## match_condition_c:block_learn:block_trial_number_c 4.181
## cur_structure_condition_c:match_condition_c:block_learn:block_trial_number_c -1.381
## Pr(>|z|)
## (Intercept) 0.107919
## cur_structure_condition_c 0.084709
## match_condition_c 0.402063
## block_learn 0.000154
## block_trial_number_c < 2e-16
## cur_structure_condition_c:match_condition_c 0.727685
## cur_structure_condition_c:block_learn 0.002512
## match_condition_c:block_learn 1.87e-07
## cur_structure_condition_c:block_trial_number_c 0.000469
## match_condition_c:block_trial_number_c 0.309636
## block_learn:block_trial_number_c 1.19e-05
## cur_structure_condition_c:match_condition_c:block_learn 0.075121
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.373499
## cur_structure_condition_c:block_learn:block_trial_number_c 0.044388
## match_condition_c:block_learn:block_trial_number_c 2.90e-05
## cur_structure_condition_c:match_condition_c:block_learn:block_trial_number_c 0.167347
##
## (Intercept)
## cur_structure_condition_c .
## match_condition_c
## block_learn ***
## block_trial_number_c ***
## cur_structure_condition_c:match_condition_c
## cur_structure_condition_c:block_learn **
## match_condition_c:block_learn ***
## cur_structure_condition_c:block_trial_number_c ***
## match_condition_c:block_trial_number_c
## block_learn:block_trial_number_c ***
## cur_structure_condition_c:match_condition_c:block_learn .
## cur_structure_condition_c:match_condition_c:block_trial_number_c
## cur_structure_condition_c:block_learn:block_trial_number_c *
## match_condition_c:block_learn:block_trial_number_c ***
## cur_structure_condition_c:match_condition_c:block_learn:block_trial_number_c
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation matrix not shown by default, as p = 16 > 12.
## Use print(x, correlation=TRUE) or
## vcov(x) if you need it
Recenter the model on the generalization block
m <- glmer(max_reward_choice ~ cur_structure_condition_c*match_condition_c*block_gen*block_trial_number_c+ (1+block_gen*block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial,glmerControl(optimizer="bobyqa",optCtrl=list(maxfun=100000)))
summary(m)
## Generalized linear mixed model fit by maximum likelihood (Laplace
## Approximation) [glmerMod]
## Family: binomial ( logit )
## Formula: max_reward_choice ~ cur_structure_condition_c * match_condition_c *
## block_gen * block_trial_number_c + (1 + block_gen * block_trial_number_c |
## subject) + (1 | choiceImage)
## Data: d
## Control: glmerControl(optimizer = "bobyqa", optCtrl = list(maxfun = 1e+05))
##
## AIC BIC logLik deviance df.resid
## 15003.7 15215.7 -7474.9 14949.7 18981
##
## Scaled residuals:
## Min 1Q Median 3Q Max
## -28.3468 -0.3887 0.0284 0.3538 14.7563
##
## Random effects:
## Groups Name Variance Std.Dev. Corr
## choiceImage (Intercept) 5.968462 2.44304
## subject (Intercept) 6.128639 2.47561
## block_gen 6.505007 2.55049 0.57
## block_trial_number_c 0.008190 0.09050 0.89 0.52
## block_gen:block_trial_number_c 0.006095 0.07807 0.62 0.73 0.74
## Number of obs: 19008, groups: choiceImage, 248; subject, 198
##
## Fixed effects:
## Estimate
## (Intercept) 0.392771
## cur_structure_condition_c -2.129875
## match_condition_c 2.345738
## block_gen 0.770484
## block_trial_number_c 0.092399
## cur_structure_condition_c:match_condition_c -1.568222
## cur_structure_condition_c:block_gen -1.540506
## match_condition_c:block_gen 2.059990
## cur_structure_condition_c:block_trial_number_c -0.072865
## match_condition_c:block_trial_number_c 0.071909
## block_gen:block_trial_number_c 0.033376
## cur_structure_condition_c:match_condition_c:block_gen -1.805598
## cur_structure_condition_c:match_condition_c:block_trial_number_c -0.068291
## cur_structure_condition_c:block_gen:block_trial_number_c -0.036829
## match_condition_c:block_gen:block_trial_number_c 0.061495
## cur_structure_condition_c:match_condition_c:block_gen:block_trial_number_c -0.050046
## Std. Error
## (Intercept) 0.250101
## cur_structure_condition_c 0.376739
## match_condition_c 0.377801
## block_gen 0.203551
## block_trial_number_c 0.007823
## cur_structure_condition_c:match_condition_c 0.746908
## cur_structure_condition_c:block_gen 0.509152
## match_condition_c:block_gen 0.395253
## cur_structure_condition_c:block_trial_number_c 0.015134
## match_condition_c:block_trial_number_c 0.015228
## block_gen:block_trial_number_c 0.007621
## cur_structure_condition_c:match_condition_c:block_gen 1.010815
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.029856
## cur_structure_condition_c:block_gen:block_trial_number_c 0.018303
## match_condition_c:block_gen:block_trial_number_c 0.014706
## cur_structure_condition_c:match_condition_c:block_gen:block_trial_number_c 0.036171
## z value
## (Intercept) 1.570
## cur_structure_condition_c -5.653
## match_condition_c 6.209
## block_gen 3.785
## block_trial_number_c 11.811
## cur_structure_condition_c:match_condition_c -2.100
## cur_structure_condition_c:block_gen -3.026
## match_condition_c:block_gen 5.212
## cur_structure_condition_c:block_trial_number_c -4.815
## match_condition_c:block_trial_number_c 4.722
## block_gen:block_trial_number_c 4.379
## cur_structure_condition_c:match_condition_c:block_gen -1.786
## cur_structure_condition_c:match_condition_c:block_trial_number_c -2.287
## cur_structure_condition_c:block_gen:block_trial_number_c -2.012
## match_condition_c:block_gen:block_trial_number_c 4.182
## cur_structure_condition_c:match_condition_c:block_gen:block_trial_number_c -1.384
## Pr(>|z|)
## (Intercept) 0.116311
## cur_structure_condition_c 1.57e-08
## match_condition_c 5.34e-10
## block_gen 0.000154
## block_trial_number_c < 2e-16
## cur_structure_condition_c:match_condition_c 0.035762
## cur_structure_condition_c:block_gen 0.002481
## match_condition_c:block_gen 1.87e-07
## cur_structure_condition_c:block_trial_number_c 1.47e-06
## match_condition_c:block_trial_number_c 2.33e-06
## block_gen:block_trial_number_c 1.19e-05
## cur_structure_condition_c:match_condition_c:block_gen 0.074054
## cur_structure_condition_c:match_condition_c:block_trial_number_c 0.022174
## cur_structure_condition_c:block_gen:block_trial_number_c 0.044200
## match_condition_c:block_gen:block_trial_number_c 2.89e-05
## cur_structure_condition_c:match_condition_c:block_gen:block_trial_number_c 0.166489
##
## (Intercept)
## cur_structure_condition_c ***
## match_condition_c ***
## block_gen ***
## block_trial_number_c ***
## cur_structure_condition_c:match_condition_c *
## cur_structure_condition_c:block_gen **
## match_condition_c:block_gen ***
## cur_structure_condition_c:block_trial_number_c ***
## match_condition_c:block_trial_number_c ***
## block_gen:block_trial_number_c ***
## cur_structure_condition_c:match_condition_c:block_gen .
## cur_structure_condition_c:match_condition_c:block_trial_number_c *
## cur_structure_condition_c:block_gen:block_trial_number_c *
## match_condition_c:block_gen:block_trial_number_c ***
## cur_structure_condition_c:match_condition_c:block_gen:block_trial_number_c
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Correlation matrix not shown by default, as p = 16 > 12.
## Use print(x, correlation=TRUE) or
## vcov(x) if you need it
Not evaludate for spark-personality
trying to sort out how to look at individual conditions and blocks
m <- glmer(max_reward_choice ~ cur_structure_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial)
summary(m)
m <- glmer(max_reward_choice ~ match_condition_c*block_trial_number_c*block_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial)
summary(m)
m <- glmer(max_reward_choice ~ match_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=filter(d,block==2), family=binomial)
summary(m)
m <- glmer(max_reward_choice ~ match_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=filter(d,block==1), family=binomial)
summary(m)#not really sure why there should be a match effect here... Noise?
## learning block
# m <- glmer(max_reward_choice ~ structure_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=filter(d,block==1), family=binomial)
# summary(m)
#
# # re-run so I don't have to re-label things
# m <- glmer(max_reward_choice ~ structure_condition*block_trial_number+ (1+block_trial_number|subject)+(1|choiceImage),data=filter(d,block==1), family=binomial)
# summary(m)
#
# sjPlot::plot_model(m, type = "pred", terms = c("block_trial_number", "structure_condition"),
# show.data = T,
# jitter = .01,
# title = "",
# axis.title = c("Trial", "Reward Maximizing Choices"),
# legend.title = "Structure Condition",
# colors = c( "orange3", "green4"),
# auto.label = FALSE)+
# theme_classic(base_size = 14, base_family = "")
# overall (same model as in section above)
m <- glmer(max_reward_choice ~ cur_structure_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=d, family=binomial)
summary(m)
m <- glmer(max_reward_choice ~ cur_structure_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=filter(d,block==2), family=binomial)
summary(m)
# generalization block
m <- glmer(max_reward_choice ~ cur_structure_condition_c*match_condition_c*block_trial_number_c+ (1+block_trial_number_c|subject)+(1|choiceImage),data=filter(d,block==2), family=binomial,glmerControl(optimizer="bobyqa",optCtrl=list(maxfun=20000)))
summary(m)
sjPlot::plot_model(m, type = "pred", terms = c("block_trial_number_c", "match_condition_c","cur_structure_condition_c"),
jitter = .01,
title = "Structure Condition",
axis.title = c("Trial", "Reward Maximizing Choices"),
legend.title = "Match Condition",
colors = c( "firebrick", "darkblue")) +
theme_classic(base_size = 14, base_family = "")